import cv2
from google.colab.patches import cv2_imshow
from tqdm import tqdm
import numpy as np
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
from sklearn.cluster import DBSCAN
from sklearn.metrics import pairwise_distances
video_path = 'DatasetC.mpg'
def Video_frame_diff_first_image(video_path):
count = 0
# read the video
cap = cv2.VideoCapture(video_path)
# loop the Video-object
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
# convert it into grayscale
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# save the Image
path = str('Original_Frame_' + str(count) + '.jpg')
cv2.imwrite(path,image)
# store the First Frame
if count == 0:
frame_1 = image
# Take the difference first and the next coming frame
img_diff = cv2.absdiff(image[:,:],frame_1[:,:])
# threshold the Difference
img_diff[img_diff > 50] = 255
img_diff[img_diff <= 50] = 1
# take the Thresholded Image
path = str('/content/Threhold_image' + str(count) + '.jpg')
cv2.imwrite(path,img_diff)
# show the Difference
cv2_imshow(img_diff)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
count +=1
else:
break
cap.release()
cv2.destroyAllWindows()
Video_frame_diff_first_image(video_path)
path_reference = 'Original_Frame_0.jpg'
path_frame_1 = 'Original_Frame_1.jpg'
path_frame_100 = 'Original_Frame_100.jpg'
# load the three Images
img_reference = cv2.imread(path_reference)
img_frame_1 = cv2.imread(path_frame_1)
img_frame_100 = cv2.imread(path_frame_100)
# take the Difference betwen them
img_diff_1 = cv2.absdiff(img_reference[:,:],img_frame_1[:,:])
cv2.imwrite('/content/img_diff_1.jpg',img_diff_1)
print('Frame Differene between Frame_0 and Frame_1')
cv2_imshow(img_diff_1)
# Threshold The Image
img_diff_1[img_diff_1 > 50] = 255
img_diff_1[img_diff_1 <= 50] = 1
print('Thresholding the Frame Differene between Frame_0 and Frame_1')
cv2_imshow(img_diff_1)
cv2.imwrite('/content/img_diff_1_clip.jpg',img_diff_1)
img_diff_100 = cv2.absdiff(img_reference[:,:],img_frame_100[:,:])
cv2.imwrite('/content/img_diff_100.jpg',img_diff_100)
print('Frame Differene between Frame_0 and Frame_100')
cv2_imshow(img_diff_100)
img_diff_100[img_diff_100 > 50] = 255
img_diff_100[img_diff_100 <= 50] = 1
print('Thresholding Frame Differene between Frame_0 and Frame_100')
cv2_imshow(img_diff_100)
cv2.imwrite('/content/img_diff_100_clip.jpg',img_diff_100)
def video_path_consecutive_frame_diff(video_path):
count = 0
# read the video
cap = cv2.VideoCapture('/content/DatasetC.mpg')
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
# convert it into grayscale
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
if count>=1:
# take th difference between the Previous Frame and Current Frame
img_diff = cv2.absdiff(image,previous_image)
# Threshold The Image
img_diff[img_diff > 40] = 255
img_diff[img_diff <= 40] = 1
# take the Thresholded Image and save it
path = str('/content/Threhold_image_previous' + str(count) + '.jpg')
cv2_imshow(img_diff)
if cv2.waitKey(1) & 0xFF == ord('q'):
break
count +=1
previous_image = image
else:
break
cap.release()
cv2.destroyAllWindows()
video_path_consecutive_frame_diff(video_path)
path_frame_20 = '/content/Original_Frame_20.jpg'
path_frame_21 = '/content/Original_Frame_21.jpg'
# Load the Images
img_frame_20 = cv2.imread(path_frame_20)
img_frame_21 = cv2.imread(path_frame_21)
# Take the Difference iof the Images
img_diff_21_20 = cv2.absdiff(img_frame_21[:,:],img_frame_20[:,:])
cv2.imwrite('/content/img_diff_1.jpg',img_diff_21_20)
print('Frame Difference between Frame 21 and Frame 20')
cv2_imshow(img_diff_21_20)
# Threshold The Images
img_diff_21_20[img_diff_21_20 > 20] = 255
img_diff_21_20[img_diff_21_20 <= 20] = 1
print('Thresholding the Frame Difference between Frame 21 and Frame 20')
cv2_imshow(img_diff_21_20)
cv2.imwrite('/content/new_diff.jpg',img_diff_21_20)
path_120 = '/content/Original_Frame_120.jpg'
path_121 = '/content/Original_Frame_121.jpg'
img_frame_20 = cv2.imread(path_120)
img_frame_21 = cv2.imread(path_121)
img_diff_21_20 = cv2.absdiff(img_frame_21[:,:],img_frame_20[:,:])
cv2.imwrite('/content/img_diff_1.jpg',img_diff_21_20)
print('Frame Difference between Frame 120 and Frame 121')
cv2_imshow(img_diff_21_20)
img_diff_21_20[img_diff_21_20 > 30] = 255
img_diff_21_20[img_diff_21_20 <= 30] = 1
print('Thresholding the Frame Difference between Frame 120 and Frame 121')
cv2_imshow(img_diff_21_20)
cv2.imwrite('/content/new_diff_1.jpg',img_diff_21_20)
def ICV_background(video_path):
count = 0
# read the video
cap = cv2.VideoCapture(video_path)
# create a EMPTY array to store the VIDEO and all its frame in 3-D array
output_video_vector=np.array([])
# frame Number to index video array
frame_no= 0
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
# convert it into grayscale
frame_no +=1
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# Taket the First Array and store it as the first face of a 3-D array
if frame_no ==1:
output_video_vector = image
continue
# then stack the other Frames along the Z-axis of the Video_Vector
output_video_vector = np.dstack((output_video_vector,image))
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
# TEMPORAL WEIGHTED AVERAGE
# Now create a Background Image with same size as each Frame
background_image = np.zeros((output_video_vector.shape[0],output_video_vector.shape[1]))
factor = []
# weighted Factor - How much importance to give to each pixel in a Frame
g= 0.99
# Loop around the Frame to calcukate the weighted Average
for i in range(140):
factor.append(g**i)
from tqdm import tqdm
# i_1 + g* i_2 + (g**2)*i_3 + (g**3)*i_4 + (g**4)*i_5 + ------ so on
# take all pixels in the all the Frame of the VIDEO at a particular position
# It will be a vector and says how is this pixel changing over time through each frame
for i in tqdm(range(output_video_vector.shape[0])):
for j in range(output_video_vector.shape[1]):
# Take the weighted Average of the Pixel's across the Vector
background_image[i,j] = int(np.average(output_video_vector[i,j], weights=factor))
cv2_imshow(background_image)
cv2.imwrite('background_image.jpg',background_image)
ICV_background(video_path)
def ICV_Count_Objects(video_path):
# read the Frame
given_frame = cv2.imread(video_path,0)
# read the Background Frame
background_image = cv2.imread('background_image.jpg',0)
# take the frame differencing between the framesd
frame_difference = cv2.absdiff(background_image[:,:],given_frame[:,:])
cv2.imwrite('img_diff_1.jpg',frame_difference)
# FIND THE THRESHOLD
frame_difference[frame_difference > 100] = 255
frame_difference[frame_difference <= 100] = 1
# np.array(np.unique(img_diff_21_20, return_counts=True)).T
#cv2_imshow(frame_difference)
img_1 = frame_difference[:,:]
# store the (x,y) of all the points which has pixel value =255
indices_max = np.where(img_1 == 255)
# store the x-cordinate of pixel's
list_x = list(indices_max[0])
# store the y-cordinates of pixel
list_y = list(indices_max[1])
# store the pixels locations in a Dataframe
df = pd.DataFrame()
df['x']=list_x
df['y'] = list_y
# convert to array
df_array = np.array(df)
df_array = df_array.astype('int32')
# DBSCAN ALGORITHM
# calculate the Distance between each point each all other points
X_distances = pairwise_distances(df_array)
# sort the points based on the Distance
X_distances.sort()
# Store the distance's and use the elbow Method to find the Threhold-Minimum Distance
distances_list = []
for i in range(X_distances.shape[0]):
distances_eps=X_distances[i][:150]
distances_list = distances_list + list(distances_eps)
distances_list = sorted(distances_list)
# '''IF needed plot the Graph to Find the Perfect Epsilon'''
#distances_list = distances_list[:2000]
#fig = plt.figure()
#ax = fig.add_axes([0,0,1,1])
#n = range(len(distances_list))
#ax.plot(distances_list,n, label='inertia')
#ax.legend()
#plt.xlabel("Distance From 10 points")
#plt.ylabel("No.of.Points from 10")
#plt.title("Distance from 10 points (vs) No.of.points")
#plt.show()
# Now calculate the Minimum distance between two points which is a hyperparameter
# It says Consider these points for Clustering if they above this distance
# Epsilon = Minimum Distance
# Loop the distance list to find the Perfect Epsilon
for i in range(1,len(distances_list)):
# take the Index or distance where the distance is constant according to Elbow Method
if (distances_list[i+1] - distances_list[i]) > (distances_list[i+2] - distances_list[i+1])+1:
x = i-2
break
# The Minimum Distance between objects should not be less than 4.2
# if the Minimum Distance is Greater than 13 then Consider the Minimum Distance =20
if distances_list[x] < 4.2 and distances_list[x] < 13:
eps_threshold = 4.123
elif distances_list[x] < 13 and distances_list[x] > 4.2:
eps_threshold = distances_list[x]
else:
eps_threshold = 20
# print the Threshold whcih is epsilon says the minimum distance between two objects in the video
print(eps_threshold,distances_list[x])
# RUN a DBSCAN Model using a Epsilon Value
model_dbscan = DBSCAN(eps=eps_threshold-1, min_samples=3)
# Fit the Extracted Positons of the array
model_dbscan.fit(df_array)
# This gives output saying which Cluster each point belong to
labels = model_dbscan.labels_
# Check the Labels which says How many Objects are present in the Frame
list_labels = list(set(labels))
# take the x and y cordinate of the one point in each Cluster
x_cordinates = []
y_cordinates = []
# Now use the one point for in each Cluster for showing in the Image
# Check where the object is present in the Image
for i in range(len(list_labels)):
# check for one point in the one Cluster
x = np.where(labels==list_labels[i])[0][0]
# append the x-cordinate of the point in the CLuster
x_cordinates.append(df_array[x][1])
# append the y-cordinate of the point in the Cluster
y_cordinates.append(df_array[x][0])
# read the Image
im = plt.imread(video_path)
# plot the Image as background and use the Cluster points in the Image to scatter and point objects
implot = plt.imshow(im)
plt.scatter(x_cordinates,y_cordinates, c='r')
plt.show()
return len(list_labels)
ICV_Count_Objects('Original_Frame_0.jpg')
ICV_Count_Objects('Original_Frame_110.jpg')
ICV_Count_Objects('Original_Frame_133.jpg')
def ICV_VIDEO_objects(video_path):
count = 0
# read the video
cap = cv2.VideoCapture(video_path)
# list to store the number of Objects in each Frame
Number_Of_Objects = []
while(cap.isOpened()):
ret, frame = cap.read()
if ret:
# convert it into grayscale
image = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
# store th Frame
path = str('Original_Frame_' + str(count) + '.jpg')
cv2.imwrite(path,image)
try :
# Now, Count the Number Of objects using the above function
total_objects_in_image = ICV_Count_Objects(path)
# Append the Objects to a List which has Number of Objects in each Frame
Number_Of_Objects.append(total_objects_in_image)
# If it gives a error then
except IndexError:
# Store the Number of Objects in Current Frame as Number of Objects in the Previous Frame
total_objects_in_image = Number_Of_Objects[-1]
Number_Of_Objects.append(total_objects_in_image)
print('Number of Objects in the Frame {}'.format(total_objects_in_image))
count=count+1
if cv2.waitKey(1) & 0xFF == ord('q'):
break
else:
break
cap.release()
cv2.destroyAllWindows()
# return the the number of objects list which has no.of.objects in each Frame
return Number_Of_Objects
Objects_per_frame = ICV_VIDEO_objects('DatasetC.mpg')
range_values = [i for i in range(140)]
plt.figure(figsize=(45,10))
plt.bar(range_values,Objects_per_frame)
path = 'BarPlot_video_count_of_objects'
plt.savefig(path)
plt.figure(figsize=(45,10))
plt.plot(range_values,Objects_per_frame)